Another potentially interesting question we can try to answer is how much face representation we see across the task. In order to do so, we’ve trained a linear SVM classifier within subjects on the data from the smoothed FFA localizer to classify signal into faces, objects and scrambles. We can then apply that classifier to various facets of our data. For each of these analyses, we will look at the probability of the classifier predicting a face. If the classifier does indeed predict a face, we score that TR with a “1”, otherwise, it gets a “0”, meaning chance becomes 1/3 = .33.

First, we will apply it to each TR of individual trials. Trials are split into 4 bins based on accuracy and load, and averaged over those measures to create a single time course for each category. The classifier was also applied to each TR of a “template” for each condition. In this analysis, all trials in a given condition were averaged to create a prototypical example for the category. The classifier was then applied to those averages.

We can then look at the probability of classification across subjects. First, we look at it across all subjects, but then we can look at it across our working memory capacity groups.

Finally, we will relate these neural measures to behavior, both averaged over time and for each TR.

library(reshape2)
library(tidyverse)
## ── Attaching packages ──────────────────────────────────────────────────────────────────────────────────────── tidyverse 1.3.0 ──
## ✓ ggplot2 3.3.2     ✓ purrr   0.3.4
## ✓ tibble  3.0.3     ✓ dplyr   1.0.1
## ✓ tidyr   1.1.1     ✓ stringr 1.4.0
## ✓ readr   1.3.1     ✓ forcats 0.5.0
## ── Conflicts ─────────────────────────────────────────────────────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag()    masks stats::lag()
library(patchwork)

load('data/behav.RData')
load('data/split_groups_info.RData')
load('data/DFR_split_groups_info.RData')

source("helper_fxns/split_into_groups.R")
source('helper_fxns/prep_trial_levels_for_plot.R')
source("helper_fxns/split_trial_type.R")

se <- function(x) {
  sd(x,na.rm=TRUE)/sqrt(length(x[!is.na(x)])) 
}
#classifier information
clf_acc <- read.csv('data/MVPA/HPC_unsmoothed/clf_acc.csv')
best_c <- read.csv('data/MVPA/HPC_unsmoothed/best_C.csv')

# averaages from template 
averages_from_template <- list(high_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_correct_avg.csv',header=FALSE), 
                               high_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_incorrect_avg.csv',header=FALSE), 
                               low_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_correct_avg.csv',header=FALSE), 
                               low_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_incorrect_avg.csv',header=FALSE))

averages_from_template[["high_load_correct_diff"]] <- averages_from_template[["high_correct"]][,1:14] - averages_from_template[["high_incorrect"]][,1:14]
averages_from_template[["low_load_correct_diff"]] <- averages_from_template[["low_correct"]][,1:14] - averages_from_template[["low_incorrect"]][,1:14]

# averages from individual trials
individual_trial_averages_probs <- list(
  high_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_correct_indiv_avg_probs.csv',header=FALSE),
  high_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_incorrect_indiv_avg_probs.csv',header=FALSE),
  low_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_correct_indiv_avg_probs.csv',header=FALSE),
  low_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_incorrect_indiv_avg_probs.csv',header=FALSE)) 

individual_trial_averages_probs[["high_load_correct_diff"]] <- individual_trial_averages_probs[["high_correct"]][,1:14] - individual_trial_averages_probs[["high_incorrect"]][,1:14]
individual_trial_averages_probs[["low_load_correct_diff"]] <- individual_trial_averages_probs[["low_correct"]][,1:14] - individual_trial_averages_probs[["low_incorrect"]][,1:14]

# averages from individual trials
individual_trial_averages_preds <- list(
  high_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_correct_indiv_avg_preds.csv',header=FALSE),
  high_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_incorrect_indiv_avg_preds.csv',header=FALSE),
  low_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_correct_indiv_avg_preds.csv',header=FALSE),
  low_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_incorrect_indiv_avg_preds.csv',header=FALSE)) 

individual_trial_averages_preds[["high_load_correct_diff"]] <- individual_trial_averages_preds[["high_correct"]][,1:14] - individual_trial_averages_preds[["high_incorrect"]][,1:14]

individual_trial_averages_preds[["low_load_correct_diff"]] <- individual_trial_averages_preds[["low_correct"]][,1:14] - individual_trial_averages_preds[["low_incorrect"]][,1:14]


averages_from_template2 <- list()
indiv_probs <- list()
indiv_preds <- list()

for (i in seq.int(1,6)){
  averages_from_template2[[names(averages_from_template)[i]]] <- averages_from_template[[i]][c(1:9,11:170),]
  indiv_preds[[names(averages_from_template)[i]]] <- individual_trial_averages_preds[[i]][c(1:9,11:170),]
  indiv_probs[[names(averages_from_template)[i]]] <- individual_trial_averages_probs[[i]][c(1:9,11:170),]
  for (ii in seq.int(1,14)){
    
    averages_from_template2[[i]][is.nan(averages_from_template2[[i]][,ii]),ii] <- NA
    indiv_probs[[i]][is.nan(indiv_probs[[i]][,ii]),ii] <- NA
    indiv_preds[[i]][is.nan(indiv_preds[[i]][,ii]),ii] <- NA
    
  }
  averages_from_template2[[i]]$PTID <- constructs_fMRI$PTID[c(1:9,11:170)]
  indiv_probs[[i]]$PTID <- constructs_fMRI$PTID[c(1:9,11:170)]
  indiv_preds[[i]]$PTID <- constructs_fMRI$PTID[c(1:9,11:170)]
  
}

averages_from_template <- averages_from_template2
individual_trial_averages_preds <- indiv_preds
individual_trial_averages_probs <- indiv_probs

rm(averages_from_template2)
rm(indiv_preds)
rm(indiv_probs)

save(list=c("clf_acc", "best_c", "averages_from_template", "individual_trial_averages_probs","individual_trial_averages_preds"), file = "data/MVPA_HPC_unsmoothed.RData")

Probability of classifying faces

On average, we were able to classify faces with 42.2% accuracy (statistically significantly different from chance = 0.33). The classifier was trained on data from an independent FFA localizer. Data was extracted from the bilateral hippocampus. From that mask, the top 100 voxels based on the faces vs objects contrast in the overall subject GLM were selected as features for the classifier. The data used to train the classifier were shifted by 4.5 seconds to account for the hemodynamic delay.

A linear SVM classifer was used; the localizer task was split into 6 blocks of stimuli. These blocks were used in a pre-defined split for cross validation, where one block of each stimulus type was held out as a test set. Data were normalized within the training and test sets separately. Within this training set, another cross validation process was repeated to tune the cost of the model over the values [0.01, 0.1, 1, 10]. The best value of the cost function was used for each cross validation to score the classifier on the test set. The best classifer was also used to predict face presence at each TR during the DFR task.

clf_acc$average <- rowMeans(clf_acc, na.rm = TRUE)
t.test(clf_acc$average,mu=0.33)
## 
##  One Sample t-test
## 
## data:  clf_acc$average
## t = 13.361, df = 167, p-value < 2.2e-16
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
##  0.4087798 0.4360975
## sample estimates:
## mean of x 
## 0.4224387
template_preds_melt <- prep_trial_levels_for_plot(averages_from_template)
## Using level as id variables
individual_trial_probs_melt <- prep_trial_levels_for_plot(individual_trial_averages_probs)
## Using level as id variables
individual_trial_preds_melt <- prep_trial_levels_for_plot(individual_trial_averages_preds)
## Using level as id variables

All subjects

The shape of the time course is different here than it was for the fusiform region - here, we’re well below chance for encoding, but start to see a significant probability during delay (starting around TR 8) and the probe.

From individual trials

Here, we’re seeing a similiar pattern to the fusform, where we see peaks of decoding accuracy around the encoding period and then probe period. However, unlike the fusiform, we’re also seeing above chance accuracy for all trial types during the delay period. We also see that during encoding, high load trials (regardless of accuracy) show a higher probability of having a face decoded than low load trials. There are no differences between trial types, however, during probe.

ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
  geom_line(aes(color=level))+
  geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
  geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
  scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
  ylab("Probability of classifier predicting a face")+
  theme_classic()

t.test(individual_trial_averages_probs[["high_correct"]]$V8,mu=0.33)
## 
##  One Sample t-test
## 
## data:  individual_trial_averages_probs[["high_correct"]]$V8
## t = 5.7612, df = 168, p-value = 3.896e-08
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
##  0.3538600 0.3787366
## sample estimates:
## mean of x 
## 0.3662983
t.test(individual_trial_averages_probs[["high_incorrect"]]$V8,mu=0.33)
## 
##  One Sample t-test
## 
## data:  individual_trial_averages_probs[["high_incorrect"]]$V8
## t = 3.5624, df = 168, p-value = 0.0004783
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
##  0.3472266 0.3900519
## sample estimates:
## mean of x 
## 0.3686392
t.test(individual_trial_averages_probs[["low_correct"]]$V8,mu=0.33)
## 
##  One Sample t-test
## 
## data:  individual_trial_averages_probs[["low_correct"]]$V8
## t = 7.5667, df = 168, p-value = 2.4e-12
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
##  0.3611637 0.3831657
## sample estimates:
## mean of x 
## 0.3721647
t.test(individual_trial_averages_probs[["low_incorrect"]]$V8,mu=0.33)
## 
##  One Sample t-test
## 
## data:  individual_trial_averages_probs[["low_incorrect"]]$V8
## t = 1.8063, df = 111, p-value = 0.07358
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
##  0.3254135 0.4291133
## sample estimates:
## mean of x 
## 0.3772634
encoding_level_avg <- data.frame(high = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V6, individual_trial_averages_probs[["high_incorrect"]]$V6), na.rm=TRUE), low = rowMeans(cbind(individual_trial_averages_probs[["low_correct"]]$V6, individual_trial_averages_probs[["low_incorrect"]]$V6),na.rm=TRUE))

t.test(encoding_level_avg$high,encoding_level_avg$low,paired=TRUE)
## 
##  Paired t-test
## 
## data:  encoding_level_avg$high and encoding_level_avg$low
## t = 2.7748, df = 168, p-value = 0.006149
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  0.008899244 0.052786079
## sample estimates:
## mean of the differences 
##              0.03084266
encoding_acc_avg <- data.frame(correct = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V6, individual_trial_averages_probs[["low_correct"]]$V6), na.rm=TRUE), incorrect = rowMeans(cbind(individual_trial_averages_probs[["low_incorrect"]]$V6, individual_trial_averages_probs[["high_incorrect"]]$V6),na.rm=TRUE))

t.test(encoding_acc_avg$correct,encoding_acc_avg$incorrect,paired=TRUE)
## 
##  Paired t-test
## 
## data:  encoding_acc_avg$correct and encoding_acc_avg$incorrect
## t = 0.19391, df = 168, p-value = 0.8465
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.02339640  0.02849327
## sample estimates:
## mean of the differences 
##             0.002548436
probe_data_indiv <- data.frame(high_correct=individual_trial_averages_probs[["high_correct"]]$V11, high_incorrect = individual_trial_averages_probs[["high_incorrect"]]$V11, low_correct = individual_trial_averages_probs[["low_correct"]]$V11)
probe_data_indiv <- melt(probe_data_indiv)
## No id variables; using all as measure variables
probe.aov <- aov(value ~ variable, data = probe_data_indiv)
summary(probe.aov)
##              Df Sum Sq  Mean Sq F value Pr(>F)
## variable      2  0.007 0.003332   0.292  0.747
## Residuals   504  5.743 0.011394
TukeyHSD(probe.aov)
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = value ~ variable, data = probe_data_indiv)
## 
## $variable
##                                      diff         lwr        upr     p adj
## high_incorrect-high_correct -0.0006518918 -0.02794820 0.02664442 0.9982640
## low_correct-high_correct    -0.0079959026 -0.03529221 0.01930041 0.7702732
## low_correct-high_incorrect  -0.0073440108 -0.03464032 0.01995230 0.8023281

It seems like there’s really not much difference between correct and incorrect trials in the hippocampus.

ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
  geom_line(aes(x=TR,y=0), linetype="dotted")+
  geom_line(aes(color=level))+
  geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
  scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
  ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
  theme_classic()

From templates

In the templates, we see a similar structure as in the individual trials with peaks around encoding and probe, though there is below chance decoding during delay period. There are no differences between trial types during probe.

ggplot(data=template_preds_melt%>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
  geom_line(aes(color=level))+
  geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
  geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
  scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
  ylab("Probability of classifier predicting a face")+
  theme_classic()

acc_data_probe <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V10,averages_from_template[["low_correct"]]$V10)), incorrect = averages_from_template[["high_incorrect"]]$V10)
t.test(acc_data_probe$correct,acc_data_probe$incorrect, paired=TRUE)
## 
##  Paired t-test
## 
## data:  acc_data_probe$correct and acc_data_probe$incorrect
## t = 1.0108, df = 168, p-value = 0.3136
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.02819852  0.08737012
## sample estimates:
## mean of the differences 
##               0.0295858
acc_data_late_probe <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V11,averages_from_template[["low_correct"]]$V11)), incorrect = averages_from_template[["high_incorrect"]]$V11)
t.test(acc_data_late_probe$correct,acc_data_late_probe$incorrect, paired=TRUE)
## 
##  Paired t-test
## 
## data:  acc_data_late_probe$correct and acc_data_late_probe$incorrect
## t = 1.3439, df = 168, p-value = 0.1808
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.02081148  0.10956887
## sample estimates:
## mean of the differences 
##               0.0443787
probe_data_template <- data.frame(high_correct=averages_from_template[["high_correct"]]$V11, high_incorrect = averages_from_template[["high_incorrect"]]$V11, low_correct = averages_from_template[["low_correct"]]$V11)
probe_data_template <- melt(probe_data_template)
## No id variables; using all as measure variables
probe.aov <- aov(value ~ variable, data = probe_data_template)
summary(probe.aov)
##              Df Sum Sq Mean Sq F value Pr(>F)
## variable      2   0.46  0.2308    1.62  0.199
## Residuals   504  71.78  0.1424
TukeyHSD(probe.aov)
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = value ~ variable, data = probe_data_template)
## 
## $variable
##                                    diff         lwr        upr     p adj
## high_incorrect-high_correct -0.07100592 -0.16751283 0.02550100 0.1951783
## low_correct-high_correct    -0.05325444 -0.14976135 0.04325248 0.3974333
## low_correct-high_incorrect   0.01775148 -0.07875544 0.11425839 0.9021177

Unlike in the other regions, there is no difference in the overall probability of predicting a face from the template vs individual trials.

compare_across_temp_indiv <- data.frame(template = rowMeans(cbind(averages_from_template[["high_correct"]]$V10,
                                                                  averages_from_template[["high_incorrect"]]$V10,
                                                                  averages_from_template[["low_correct"]]$V10)),
                                        indiv = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V10,
                                                               individual_trial_averages_probs[["high_incorrect"]]$V10,
                                                               individual_trial_averages_probs[["low_correct"]]$V10)))

wilcox.test(compare_across_temp_indiv$template, compare_across_temp_indiv$indiv,paired=TRUE)
## 
##  Wilcoxon signed rank test with continuity correction
## 
## data:  compare_across_temp_indiv$template and compare_across_temp_indiv$indiv
## V = 6696, p-value = 0.4455
## alternative hypothesis: true location shift is not equal to 0

Similar to before, there aren’t any differences between correct and incorrect trials from the template.

ggplot(data=template_preds_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
  geom_line(aes(color=level))+
  geom_line(aes(x=TR,y=0), linetype="dotted")+
  geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
  scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
  ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
  theme_classic()

encoding_correct_diff_high <- data.frame(correct=averages_from_template[["high_correct"]]$V6, incorrect=averages_from_template[["high_incorrect"]]$V6)

probe_correct_diff_high <- data.frame(correct=averages_from_template[["high_correct"]]$V11, incorrect=averages_from_template[["high_incorrect"]]$V11)

t.test(encoding_correct_diff_high$correct, encoding_correct_diff_high$incorrect, paired=TRUE)
## 
##  Paired t-test
## 
## data:  encoding_correct_diff_high$correct and encoding_correct_diff_high$incorrect
## t = 1.4479, df = 168, p-value = 0.1495
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.01935779  0.12586667
## sample estimates:
## mean of the differences 
##              0.05325444
t.test(probe_correct_diff_high$correct, probe_correct_diff_high$incorrect, paired=TRUE)
## 
##  Paired t-test
## 
## data:  probe_correct_diff_high$correct and probe_correct_diff_high$incorrect
## t = 1.811, df = 168, p-value = 0.07193
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
##  -0.006399565  0.148411399
## sample estimates:
## mean of the differences 
##              0.07100592

By Working Memory Capacity Groups

split_template <- split_trial_type(averages_from_template,WM_groups)
split_indiv_probs <- split_trial_type(individual_trial_averages_probs, WM_groups)
split_indiv_preds <- split_trial_type(individual_trial_averages_preds, WM_groups)

From Indiv Trials

In high load incorrect trials, we see that medium capacity subjects show greater face classification than both low and high capacity subjects during encoding, and greater than low capacity subjects during low load correct trials. We also see that in low load incorrect trials, low capacity subjects show stronger face classification than medium capacity subjects.

indiv_avgs <- list()

for (i in seq.int(1,4)){
  indiv_avgs[[i]] <- ggplot(data = split_indiv_probs[["avgs"]][[i]][["all"]])+
    geom_line(aes(x=TR,y=mean,color=group))+
    geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
    geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
    scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
    ggtitle(names(split_indiv_probs[["avgs"]])[i])+
    ylab("Probability")+
    theme_classic()
  
}

(indiv_avgs[[1]] + indiv_avgs[[2]]) / (indiv_avgs[[3]] + indiv_avgs[[4]])+
  plot_layout(guides = "collect")+
  plot_annotation(title="Probability of classifier predicting a face from individual trials")

print("encoding")
## [1] "encoding"
for (trial_type in seq.int(1,4)){ 
  print(names(split_indiv_probs[["all_data"]])[trial_type])
  temp.aov <- aov(split_indiv_probs[["all_data"]][[trial_type]][["all"]][,6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][,16])
  print(summary(temp.aov))
  print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
##                                                               Df Sum Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2 0.0077
## Residuals                                                    164 1.8799
##                                                               Mean Sq F value
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.003856   0.336
## Residuals                                                    0.011463        
##                                                              Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]  0.715
## Residuals                                                          
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                  diff         lwr        upr     p adj
## med-high  0.009552243 -0.03830343 0.05740792 0.8845015
## low-high -0.007048554 -0.05512126 0.04102415 0.9358866
## low-med  -0.016600798 -0.06467350 0.03147191 0.6931879
## 
## [1] "high_incorrect"
##                                                               Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2  0.305 0.15254
## Residuals                                                    164  3.813 0.02325
##                                                              F value  Pr(>F)   
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]    6.56 0.00182 **
## Residuals                                                                      
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                  diff         lwr         upr     p adj
## med-high  0.085321173  0.01716236  0.15347998 0.0098155
## low-high -0.009730253 -0.07819817  0.05873767 0.9396470
## low-med  -0.095051426 -0.16351935 -0.02658351 0.0035708
## 
## [1] "low_correct"
##                                                               Df Sum Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2 0.0519
## Residuals                                                    164 0.9826
##                                                               Mean Sq F value
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.025948   4.331
## Residuals                                                    0.005991        
##                                                              Pr(>F)  
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.0147 *
## Residuals                                                            
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff         lwr          upr     p adj
## med-high  0.02436155 -0.01023583  0.058958923 0.2216441
## low-high -0.01874507 -0.05349935  0.016009215 0.4109210
## low-med  -0.04310661 -0.07786089 -0.008352332 0.0106437
## 
## [1] "low_incorrect"
##                                                               Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   2  0.474 0.23680
## Residuals                                                    107  6.638 0.06203
##                                                              F value Pr(>F)  
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]   3.817  0.025 *
## Residuals                                                                    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 57 observations deleted due to missingness
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff         lwr        upr     p adj
## med-high -0.05007852 -0.19607937 0.09592233 0.6944356
## low-high  0.10141885 -0.03739146 0.24022917 0.1964224
## low-med   0.15149737  0.01742124 0.28557350 0.0226681

From templates

There are no group differences at encoding from the template, but there is a trend towards medium capacity subjects showing stronger face classification evidence during encoding in high load trials, regardless of accuracy.

template_avgs <- list()

for (i in seq.int(1,4)){
  template_avgs[[i]] <- ggplot(data = split_template[["avgs"]][[i]][["all"]])+
    geom_line(aes(x=TR,y=mean,color=group))+
    geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
    geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
    scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
    ggtitle(names(split_template[["avgs"]])[i])+
    ylab("Probability")+
    theme_classic()
  
}

(template_avgs[[1]] + template_avgs[[2]]) / (template_avgs[[3]] + template_avgs[[4]])+
  plot_layout(guides = "collect")+
  plot_annotation(title="Probability of classifier predicting a face from trial templates")

for (trial_type in seq.int(1,4)){ 
  print(names(split_template[["all_data"]])[trial_type])
  temp.aov <- aov(split_template[["all_data"]][[trial_type]][["all"]][,6] ~ split_template[["all_data"]][[trial_type]][["all"]][,16])
  print(summary(temp.aov))
  print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
##                                                            Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2  0.402  0.2009
## Residuals                                                 164 23.517  0.1434
##                                                           F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   1.401  0.249
## Residuals                                                               
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff        lwr        upr     p adj
## med-high  0.04761905 -0.1216413 0.21687936 0.7838615
## low-high -0.07196970 -0.2419976 0.09805824 0.5771926
## low-med  -0.11958874 -0.2896167 0.05043920 0.2223898
## 
## [1] "high_incorrect"
##                                                            Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2  0.759  0.3797
## Residuals                                                 164 23.208  0.1415
##                                                           F value Pr(>F)  
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2.683 0.0714 .
## Residuals                                                                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff         lwr         upr     p adj
## med-high  0.08333333 -0.08481157 0.251478236 0.4714580
## low-high -0.08208874 -0.25099621 0.086818724 0.4851662
## low-med  -0.16542208 -0.33432955 0.003485391 0.0563252
## 
## [1] "low_correct"
##                                                            Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2   0.78  0.3902
## Residuals                                                 164  23.68  0.1444
##                                                           F value Pr(>F)  
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2.702   0.07 .
## Residuals                                                                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
##                 diff         lwr        upr     p adj
## med-high  0.13095238 -0.03888809 0.30079285 0.1650727
## low-high -0.02472944 -0.19534017 0.14588129 0.9372965
## low-med  -0.15568182 -0.32629255 0.01492891 0.0815924
## 
## [1] "low_incorrect"
##                                                            Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16]   2      0       0
## Residuals                                                 107      0       0
##                                                           F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16]               
## Residuals                                                               
## 57 observations deleted due to missingness
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
## 
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
##          diff lwr upr p adj
## med-high    0   0   0   NaN
## low-high    0   0   0   NaN
## low-med     0   0   0   NaN

Correlation with Behavior

Individual Trials

Averaged over time

If we look averaged over time, we see a significant negative correlation with omnibus span and classification at low load correct trials, and a positive correlation with the difference between correct and incorrect in low load trials. We also see significant negative correlation between BPRS and classification at high load incorrect trials, and a positive cprrelation with the differnce between correct and incorrect at high load trials.

indiv_avg_over_time <- data.frame(high_correct = rowMeans(individual_trial_averages_probs[["high_correct"]][,1:14]),
                                  high_incorrect = rowMeans(individual_trial_averages_probs[["high_incorrect"]][,1:14]),
                                  low_correct = rowMeans(individual_trial_averages_probs[["low_correct"]][,1:14]),
                                  low_incorrect = rowMeans(individual_trial_averages_probs[["low_incorrect"]][,1:14],na.rm=TRUE), 
                                  high_load_diff_correct = rowMeans(individual_trial_averages_probs[["high_load_correct_diff"]][,1:14]),
                                  low_load_diff_correct = rowMeans(individual_trial_averages_probs[["low_load_correct_diff"]][,1:14]))

indiv_avg_over_time[is.na(indiv_avg_over_time)] <- NA 
indiv_avg_over_time <- data.frame(indiv_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI[c(1:9,11:170)], PTID = constructs_fMRI$PTID[c(1:9,11:170)])
indiv_avg_over_time <- merge(indiv_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")


plot_list <- list()

for (i in seq.int(1,6)){
  plot_data <- indiv_avg_over_time[,c(i+1,8:11)]
  colnames(plot_data)[1] <- "prob"
  plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=omnibus_span))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(indiv_avg_over_time)[i+1])+
    theme_classic()
  
  plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(indiv_avg_over_time)[i+1])+
    theme_classic()
  
  plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(indiv_avg_over_time)[i+1])+
    theme_classic()
  
}

(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
  (plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
  (plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
  (plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(indiv_avg_over_time$low_incorrect, indiv_avg_over_time$omnibus_span)
## 
##  Pearson's product-moment correlation
## 
## data:  indiv_avg_over_time$low_incorrect and indiv_avg_over_time$omnibus_span
## t = -3.8905, df = 110, p-value = 0.0001717
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.5010112 -0.1734237
## sample estimates:
##        cor 
## -0.3477877
cor.test(indiv_avg_over_time$low_load_diff_correct, indiv_avg_over_time$omnibus_span)
## 
##  Pearson's product-moment correlation
## 
## data:  indiv_avg_over_time$low_load_diff_correct and indiv_avg_over_time$omnibus_span
## t = 3.6406, df = 110, p-value = 0.0004162
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.1515911 0.4840266
## sample estimates:
##       cor 
## 0.3279229
cor.test(indiv_avg_over_time$high_incorrect, indiv_avg_over_time$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  indiv_avg_over_time$high_incorrect and indiv_avg_over_time$BPRS_TOT
## t = -3.2945, df = 167, p-value = 0.001204
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.38368390 -0.09979368
## sample estimates:
##        cor 
## -0.2470322
cor.test(indiv_avg_over_time$high_load_diff_correct, indiv_avg_over_time$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  indiv_avg_over_time$high_load_diff_correct and indiv_avg_over_time$BPRS_TOT
## t = 3.4236, df = 167, p-value = 0.0007775
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.1093587 0.3919004
## sample estimates:
##      cor 
## 0.256091

All subjects

Across time

If we look at the patterns over time, we can see that BPRS tends to be negatively related to classification during encoding and probe periods. There is most correlation with accuracy during the encoding period. Span generally has a low correlation with classification probability.

Next step is to pull out some of these correlations and see if they’re significant.

data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")

data_to_plot <- data_to_plot[c(1:9,11:170),c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1

corr_to_behav_plots <- list()

for (i in seq.int(1,6)){
  measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
  
  for (measure in seq.int(2,5)){
    for (TR in seq.int(1,14)){
      measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],individual_trial_averages_probs[[i]][,TR],use = "pairwise.complete.obs")
    }
  }
  
  measure_by_time <- data.frame(t(measure_by_time))
  measure_by_time$TR <- seq.int(1,14)
  
  colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
  
  melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
  
  corr_to_behav_plots[[names(individual_trial_averages_probs)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
    geom_line(aes(color=variable))+
    scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
    ggtitle(names(individual_trial_averages_probs)[i])+
    theme_classic()
  
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
  plot_layout(guides="collect")+
  plot_annotation(title = "Correlation between face classification and behavioral measures")

(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
  plot_layout(guides="collect")+
  plot_annotation(title = "Correlation between difference across correctness in face classification and behavioral measures")

plot_list <- list()

for (trial_type in seq.int(1,6)){ 
  temp_plot_data <- merge(p200_data, individual_trial_averages_probs[[trial_type]],by="PTID")
  temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
  
  plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  # Acc
  
  plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  # BPRS  
  plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(individual_trial_averages_probs)[trial_type])+
    theme_classic()
  
  
}

Encoding

There is a significant positive relationship between accuracy and decoding probability in incorrect high load trials. We also see significant correlations between omnibus span and the correct/incorrect difference at low load. There is a trend towards a negative correlation between BPRS and classification at high load incorrect trials.

(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /  
  (plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]])  +
  plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /  
  (plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]])  +
  plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /  
  (plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]])  +
  plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(individual_trial_averages_probs[["low_correct"]]$V6,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_correct"]]$V6 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 0.31101, df = 167, p-value = 0.7562
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.1273628  0.1743865
## sample estimates:
##        cor 
## 0.02405982
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V6,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_load_correct_diff"]]$V6 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 2.9287, df = 110, p-value = 0.004138
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.08777877 0.43290510
## sample estimates:
##       cor 
## 0.2689539
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_incorrect"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 2.0207, df = 167, p-value = 0.0449
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.003616322 0.298490607
## sample estimates:
##      cor 
## 0.154492
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V6,temp_plot_data$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_incorrect"]]$V6 and temp_plot_data$BPRS_TOT
## t = -1.8795, df = 167, p-value = 0.06191
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.288617034  0.007188321
## sample estimates:
##        cor 
## -0.1439279
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V6,temp_plot_data$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V6 and temp_plot_data$BPRS_TOT
## t = 1.8722, df = 167, p-value = 0.06293
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.007750369  0.288101692
## sample estimates:
##       cor 
## 0.1433774
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V6,temp_plot_data$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_load_correct_diff"]]$V6 and temp_plot_data$BPRS_TOT
## t = -0.69849, df = 110, p-value = 0.4863
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.2489373  0.1205917
## sample estimates:
##         cor 
## -0.06645102

Delay

There are no relationships at delay, though there is a trending correlation for the difference between correct and incorrect at high load trials and BPRS (excluding the outlier).

(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /  
  (plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]])  +
  plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /  
  (plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]])  +
  plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /  
  (plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]])  +
  plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(individual_trial_averages_probs[["low_correct"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_correct"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 0.50568, df = 167, p-value = 0.6137
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.1125234  0.1889456
## sample estimates:
##        cor 
## 0.03910086
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.1787, df = 167, p-value = 0.2402
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.0609662  0.2385193
## sample estimates:
##        cor 
## 0.09082977
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_load_correct_diff"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.8679, df = 110, p-value = 0.06444
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.01056428  0.34951954
## sample estimates:
##       cor 
## 0.1753352
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V8[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V8[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 1.9034, df = 166, p-value = 0.05873
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.005386177  0.291111040
## sample estimates:
##       cor 
## 0.1461428

Probe

Omnibus span is significantly negatively correlated withh probability of classification at incorrect low load trials and positively correlated with the difference between correct/incorrect trials at low load.

Probability of classification at incorrect high load trials is significantly negatively correlated with BPRS at TR 11, and the difference between correct trials at high load trials and BPRS.

(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /  
  (plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]])  +
  plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /  
  (plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]])  +
  plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /  
  (plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]])  +
  plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(individual_trial_averages_probs[["low_incorrect"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_incorrect"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -2.8211, df = 110, p-value = 0.005679
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.42483128 -0.07795315
## sample estimates:
##        cor 
## -0.2597517
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_load_correct_diff"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 2.9138, df = 110, p-value = 0.004326
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.08641742 0.43178976
## sample estimates:
##       cor 
## 0.2676809
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V11,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["low_load_correct_diff"]]$V11 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 1.9161, df = 110, p-value = 0.05795
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.006037528  0.353487266
## sample estimates:
##       cor 
## 0.1797196
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_incorrect"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = -3.1733, df = 166, p-value = 0.001796
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.37691091 -0.09103542
## sample estimates:
##        cor 
## -0.2391489
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
## 
##  Pearson's product-moment correlation
## 
## data:  individual_trial_averages_probs[["high_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 3.2122, df = 166, p-value = 0.001582
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.09393928 0.37942095
## sample estimates:
##       cor 
## 0.2419083
behav_classification_corr_list <- list()

for (trial_type in seq.int(1,6)){ 
  group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_omnibus) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_omnibus) <- seq.int(1,14)
  
  group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_acc) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_acc) <- seq.int(1,14)
  
  group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_BPRS) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_BPRS) <- seq.int(1,14)
  
  for (level in seq.int(1,3)){ 
    temp_subj <- split_indiv_probs[["all_data"]][[trial_type]][[level]][order(split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID),]
    temp_data <- data_to_plot[data_to_plot$PTID %in% split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID,]
    
    for (TR in seq.int(1,14)){
      
      group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
      group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
      group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
      
    }
    group_corrs_acc$level <- factor(rownames(group_corrs_acc))
    group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
    group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
    
  }
  
  behav_classification_corr_list[["omnibus"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_omnibus
  behav_classification_corr_list[["BPRS"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_BPRS
  behav_classification_corr_list[["L3_Acc"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_acc
}

By Working Memory Capacity

behav_classification_corr_melt <- list()
behav_split_plot_list <- list()

for (measure in seq.int(1,3)){
  for (trial_type in seq.int(1,6)){ 
    behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
    behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
    behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
    
    behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <- 
      ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
      geom_line(aes(color=level))+
      scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
      ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
      xlab("TR")+
      ylab("Correlation")+
      theme_classic()
    
  }
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) / 
  (behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
  plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) + 
  plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) / 
  (behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
  plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) +
  plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) / 
  (behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
  plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) +
  plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

Template

Averaged over time

If we average over time, the only significant realtionship is a negative correlation between high load accuracy and the difference between correct and incorrect trials at high load.

template_avg_over_time <- data.frame(high_correct = rowMeans(averages_from_template[["high_correct"]][,1:14]),
                                     high_incorrect = rowMeans(averages_from_template[["high_incorrect"]][,1:14]),
                                     low_correct = rowMeans(averages_from_template[["low_correct"]][,1:14]),
                                     low_incorrect = rowMeans(averages_from_template[["low_incorrect"]][,1:14],na.rm=TRUE), 
                                     high_load_diff_correct = rowMeans(averages_from_template[["high_load_correct_diff"]][,1:14]),
                                     low_load_diff_correct = rowMeans(averages_from_template[["low_load_correct_diff"]][,1:14]))

template_avg_over_time[is.na(template_avg_over_time)] <- NA 
template_avg_over_time <- data.frame(template_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI[c(1:9,11:170)], PTID = constructs_fMRI$PTID[c(1:9,11:170)])
template_avg_over_time <- merge(template_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")


plot_list <- list()

for (i in seq.int(1,6)){
  plot_data <- template_avg_over_time[,c(i+1,8:11)]
  colnames(plot_data)[1] <- "prob"
  plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=omnibus_span))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(template_avg_over_time)[i+1])+
    theme_classic()
  
  plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(template_avg_over_time)[i+1])+
    theme_classic()
  
  plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(x=prob,y=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    xlab("Probability")+
    ggtitle(colnames(template_avg_over_time)[i+1])+
    theme_classic()
  
}

(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
  (plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
  (plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
  (plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) + 
  plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
  plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(template_avg_over_time$high_correct, template_avg_over_time$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  template_avg_over_time$high_correct and template_avg_over_time$XDFR_MRI_ACC_L3
## t = -1.8229, df = 167, p-value = 0.0701
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.28463797  0.01152317
## sample estimates:
##        cor 
## -0.1396798
cor.test(template_avg_over_time$high_incorrect, template_avg_over_time$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  template_avg_over_time$high_incorrect and template_avg_over_time$XDFR_MRI_ACC_L3
## t = 1.5467, df = 167, p-value = 0.1238
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.03271045  0.26504169
## sample estimates:
##       cor 
## 0.1188364
cor.test(template_avg_over_time$high_load_diff_correct, template_avg_over_time$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  template_avg_over_time$high_load_diff_correct and template_avg_over_time$XDFR_MRI_ACC_L3
## t = -2.6299, df = 167, p-value = 0.00934
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.34013724 -0.04996067
## sample estimates:
##        cor 
## -0.1994167
cor.test(template_avg_over_time$low_load_diff_correct, template_avg_over_time$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  template_avg_over_time$low_load_diff_correct and template_avg_over_time$XDFR_MRI_ACC_L3
## t = 0.13788, df = 110, p-value = 0.8906
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.1728319  0.1982177
## sample estimates:
##        cor 
## 0.01314542

All subjects

Over time

data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")

data_to_plot <- data_to_plot[c(1:9,11:170),c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1

corr_to_behav_plots <- list()

for (i in seq.int(1,6)){
  measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
  
  for (measure in seq.int(2,5)){
    for (TR in seq.int(1,14)){
      measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],averages_from_template[[i]][,TR],use = "pairwise.complete.obs")
    }
  }
  
  measure_by_time <- data.frame(t(measure_by_time))
  measure_by_time$TR <- seq.int(1,14)
  
  colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
  
  melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
  
  corr_to_behav_plots[[names(averages_from_template)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
    geom_line(aes(color=variable))+
    scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
    ggtitle(names(averages_from_template)[i])+
    theme_classic()
  
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
  plot_layout(guides="collect")+
  plot_annotation(title = "Correlation between face classification and behavioral measures")
## Warning: Removed 56 row(s) containing missing values (geom_path).

(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
  plot_layout(guides="collect")+
  plot_annotation(title = "Correlation between face classification and behavioral measures")

plot_list <- list()

for(trial_type in seq.int(1,6)){ 
  temp_plot_data <- merge(p200_data, averages_from_template[[trial_type]],by="PTID")
  temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
  
  plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  # Acc
  
  plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  # BPRS  
  plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
    geom_point()+
    stat_smooth(method="lm")+
    ylab("Probability")+
    ggtitle(names(averages_from_template)[trial_type])+
    theme_classic()
  
  
}

Encoding

We see positive relationships with accuracy with classification at incorrect high load trials and correct high load trials.

(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /  
  (plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]]) +
  plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /  
  (plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]]) +
  plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /  
  (plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]]) +
  plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(averages_from_template[["high_incorrect"]]$V6,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_incorrect"]]$V6 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.8552, df = 167, p-value = 0.06534
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.009054223  0.286905528
## sample estimates:
##    cor 
## 0.1421
cor.test(averages_from_template[["high_load_correct_diff"]]$V6,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_load_correct_diff"]]$V6 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -0.9027, df = 167, p-value = 0.368
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.21834621  0.08214106
## sample estimates:
##         cor 
## -0.06968304
cor.test(averages_from_template[["high_correct"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_correct"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 1.2589, df = 167, p-value = 0.2098
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.05480748  0.24433855
## sample estimates:
##        cor 
## 0.09695473
cor.test(averages_from_template[["high_incorrect"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_incorrect"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 2.0349, df = 167, p-value = 0.04344
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.00469966 0.29947712
## sample estimates:
##       cor 
## 0.1555493
cor.test(averages_from_template[["low_correct"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["low_correct"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 2.0216, df = 167, p-value = 0.04481
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.003682385 0.298550783
## sample estimates:
##       cor 
## 0.1545565

Delay

There is a significant negative correlation between accuracy at high load and face classification at high load correct trials during delay period.

(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /  
  (plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]]) +
  plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /  
  (plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]]) +
  plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /  
  (plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]]) +
  plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(averages_from_template[["high_correct"]]$V8,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_correct"]]$V8 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -1.9939, df = 167, p-value = 0.04779
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.296618483 -0.001562356
## sample estimates:
##        cor 
## -0.1524864

Probe

There are no significant relationships at probe.

(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /  
  (plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
  plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).
## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]]) +
  plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /  
  (plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
  plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]]) +
  plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /  
  (plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
  plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]]) +
  plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 57 rows containing non-finite values (stat_smooth).

## Warning: Removed 57 rows containing missing values (geom_point).

cor.test(averages_from_template[["high_correct"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_correct"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -1.4362, df = 167, p-value = 0.1528
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.25712685  0.04119269
## sample estimates:
##        cor 
## -0.1104542
cor.test(averages_from_template[["high_load_correct_diff"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_load_correct_diff"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -1.1638, df = 167, p-value = 0.2462
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.23744163  0.06210425
## sample estimates:
##         cor 
## -0.08969671
cor.test(averages_from_template[["high_incorrect"]]$V11,temp_plot_data$XDFR_MRI_ACC_L3)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_incorrect"]]$V11 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -1.2128, df = 167, p-value = 0.2269
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.24099779  0.05834592
## sample estimates:
##         cor 
## -0.09343709
cor.test(averages_from_template[["high_correct"]]$V11,temp_plot_data$BPRS_TOT)
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_correct"]]$V11 and temp_plot_data$BPRS_TOT
## t = 0.57403, df = 167, p-value = 0.5667
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.1073025  0.1940365
## sample estimates:
##        cor 
## 0.04437635
cor.test(averages_from_template[["high_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
## 
##  Pearson's product-moment correlation
## 
## data:  averages_from_template[["high_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 0.18383, df = 166, p-value = 0.8544
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  -0.1374401  0.1653193
## sample estimates:
##       cor 
## 0.0142666
behav_classification_corr_list <- list()

for (trial_type in seq.int(1,6)){ 
  group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_omnibus) <- names(split_template[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_omnibus) <- seq.int(1,14)
  
  group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_acc) <- names(split_template[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_acc) <- seq.int(1,14)
  
  group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
  rownames(group_corrs_BPRS) <- names(split_template[["all_data"]][[trial_type]])[1:3]
  colnames(group_corrs_BPRS) <- seq.int(1,14)
  
  for (level in seq.int(1,3)){ 
    temp_subj <- split_template[["all_data"]][[trial_type]][[level]][order(split_template[["all_data"]][[trial_type]][[level]]$PTID),]
    temp_data <- data_to_plot[data_to_plot$PTID %in% split_template[["all_data"]][[trial_type]][[level]]$PTID,]
    
    for (TR in seq.int(1,14)){
      
      group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
      group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
      group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
      
    }
    group_corrs_acc$level <- factor(rownames(group_corrs_acc))
    group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
    group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
    
  }
  
  behav_classification_corr_list[["omnibus"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_omnibus
  behav_classification_corr_list[["BPRS"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_BPRS
  behav_classification_corr_list[["L3_Acc"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_acc
}

By Working Memory Capacity

behav_classification_corr_melt <- list()
behav_split_plot_list <- list()

for (measure in seq.int(1,3)){
  for (trial_type in seq.int(1,6)){ 
    behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
    behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
    behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
    
    behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <- 
      ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
      geom_line(aes(color=level))+
      scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
      ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
      xlab("TR")+
      ylab("Correlation")+
      theme_classic()
    
  }
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) / 
  (behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
  plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")
## Warning: Removed 42 row(s) containing missing values (geom_path).

(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) + 
  plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) / 
  (behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
  plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")
## Warning: Removed 42 row(s) containing missing values (geom_path).

(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) + 
  plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")

(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) / 
  (behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
  plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
  plot_layout(guides="collect")
## Warning: Removed 42 row(s) containing missing values (geom_path).

(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) + 
  plot_annotation("BPRS Total with Face Classification Probability by Group")+
  plot_layout(guides="collect")